PART II B): German Credit Score Classification Model EXPLAINABILITY, BIAS & FAIRNESS (Gender as protected variable).

By: Krishna J

Importing necessary libraries

In [1]:
#!pip install --upgrade tensorflow==1.15.0
In [2]:
import pandas as pd
import numpy as np
import seaborn               as sns
import matplotlib.pyplot     as plt
from sklearn.model_selection import train_test_split
#from sklearn.ensemble        import RandomForestClassifier
#from sklearn.linear_model    import LogisticRegression
from sklearn.preprocessing   import MinMaxScaler, StandardScaler
from sklearn.base            import TransformerMixin
from sklearn.pipeline        import Pipeline, FeatureUnion
from typing                  import List, Union, Dict
# Warnings will be used to silence various model warnings for tidier output
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline 
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
np.random.seed(0)
#!pip install fairlearn
#!pip install aif360
#!pip install shap
#!pip install eli5
#!pip install BlackBoxAuditing

Importing source dataset

In [3]:
German_df = pd.read_csv('C:/Users/krish/Downloads/German-reduced_jp.csv')

print(German_df.shape)
print (German_df.columns)
(1000, 17)
Index(['Gender', 'Age', 'Marital_Status', 'CurrentAcc_None',
       'CurrentAcc_LT200', 'Savings_LT500', 'CreditHistory_none/paid',
       'Debtors_co-applicant', 'Job_unskilled-resident', 'NumMonths',
       'Telephone', 'Purpose_education', 'Purpose_furniture/equip',
       'CreditAmount', 'Foreignworker', 'Debtors_guarantor', 'CreditStatus'],
      dtype='object')
In [4]:
German_df.head()
Out[4]:
Gender Age Marital_Status CurrentAcc_None CurrentAcc_LT200 Savings_LT500 CreditHistory_none/paid Debtors_co-applicant Job_unskilled-resident NumMonths Telephone Purpose_education Purpose_furniture/equip CreditAmount Foreignworker Debtors_guarantor CreditStatus
0 1 1 1 0 1 0 0 0 0 6 1 0 0 0.050567 1 0 1
1 0 0 0 0 1 1 1 0 0 48 0 0 0 0.313690 1 0 0
2 1 1 1 1 0 1 0 0 1 12 0 1 0 0.101574 1 0 1
3 1 1 1 0 1 1 1 0 0 42 0 0 1 0.419941 1 1 1
4 1 1 1 0 1 1 0 0 0 24 0 0 0 0.254209 1 0 0
In [5]:
#feature_list = ['Gender','Age','Marital_Status','NumMonths','Savings_<500','Savings_none','Dependents','Property_rent','Job_management/self-emp/officer/highly qualif emp','Debtors_guarantor','Purpose_CarNew',                           'Purpose_furniture/equip','CreditHistory_none/paid','Purpose_CarUsed','CreditAmount','CreditStatus']
feature_list=['Gender', 'Age', 'Marital_Status', 'CurrentAcc_None',
       'CurrentAcc_LT200', 'Savings_LT500', 'CreditHistory_none/paid',
       'Debtors_co-applicant', 'Job_unskilled-resident', 'NumMonths',
       'Telephone', 'Purpose_education', 'Purpose_furniture/equip',
       'CreditAmount', 'Foreignworker', 'Debtors_guarantor', 'CreditStatus']
In [6]:
X = German_df.iloc[:, :-1]
y = German_df['CreditStatus']
X.head()
y.head()
Out[6]:
Gender Age Marital_Status CurrentAcc_None CurrentAcc_LT200 Savings_LT500 CreditHistory_none/paid Debtors_co-applicant Job_unskilled-resident NumMonths Telephone Purpose_education Purpose_furniture/equip CreditAmount Foreignworker Debtors_guarantor
0 1 1 1 0 1 0 0 0 0 6 1 0 0 0.050567 1 0
1 0 0 0 0 1 1 1 0 0 48 0 0 0 0.313690 1 0
2 1 1 1 1 0 1 0 0 1 12 0 1 0 0.101574 1 0
3 1 1 1 0 1 1 1 0 0 42 0 0 1 0.419941 1 1
4 1 1 1 0 1 1 0 0 0 24 0 0 0 0.254209 1 0
Out[6]:
0    1
1    0
2    1
3    1
4    0
Name: CreditStatus, dtype: int64

from imblearn.over_sampling import ADASYN from collections import Counter

ada = ADASYN(random_state=40) print('Original dataset shape {}'.format(Counter(y))) X_res, y_res = ada.fit_resample(X,y) print('Resampled dataset shape {}'.format(Counter(y_res)))

German_df=X = pd.DataFrame(np.column_stack((X_res, y_res)))

In [7]:
German_df.head()
Out[7]:
Gender Age Marital_Status CurrentAcc_None CurrentAcc_LT200 Savings_LT500 CreditHistory_none/paid Debtors_co-applicant Job_unskilled-resident NumMonths Telephone Purpose_education Purpose_furniture/equip CreditAmount Foreignworker Debtors_guarantor CreditStatus
0 1 1 1 0 1 0 0 0 0 6 1 0 0 0.050567 1 0 1
1 0 0 0 0 1 1 1 0 0 48 0 0 0 0.313690 1 0 0
2 1 1 1 1 0 1 0 0 1 12 0 1 0 0.101574 1 0 1
3 1 1 1 0 1 1 1 0 0 42 0 0 1 0.419941 1 1 1
4 1 1 1 0 1 1 0 0 0 24 0 0 0 0.254209 1 0 0

German_df.columns=feature_list German_df.head()

Metrics to calculate model fairness necessary libraries

In [8]:
from aif360.datasets import GermanDataset
from aif360.metrics import BinaryLabelDatasetMetric

def fair_metrics(fname, dataset, pred, pred_is_dataset=False):
    filename = fname
    if pred_is_dataset:
        dataset_pred = pred
    else:
        dataset_pred = dataset.copy()
        dataset_pred.labels = pred

    cols = ['Accuracy', 'F1', 'DI','SPD', 'EOD', 'AOD', 'ERD', 'CNT', 'TI']
    obj_fairness = [[1,1,1,0,0,0,0,1,0]]

    fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)

    for attr in dataset_pred.protected_attribute_names:
        idx = dataset_pred.protected_attribute_names.index(attr)
        privileged_groups =  [{attr:dataset_pred.privileged_protected_attributes[idx][0]}]
        unprivileged_groups = [{attr:dataset_pred.unprivileged_protected_attributes[idx][0]}]

        classified_metric = ClassificationMetric(dataset,
                                                     dataset_pred,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups)

        metric_pred = BinaryLabelDatasetMetric(dataset_pred,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups)

        distortion_metric = SampleDistortionMetric(dataset,
                                                     dataset_pred,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups)

        acc = classified_metric.accuracy()
        f1_sc = 2 * (classified_metric.precision() * classified_metric.recall()) / (classified_metric.precision() + classified_metric.recall())

        mt = [acc, f1_sc,
                        classified_metric.disparate_impact(),
                        classified_metric.mean_difference(),
                        classified_metric.equal_opportunity_difference(),
                        classified_metric.average_odds_difference(),
                        classified_metric.error_rate_difference(),
                        metric_pred.consistency(),
                        classified_metric.theil_index()
                    ]
        w_row = []
        print('Computing fairness of the model.')
        for i in mt:
            #print("%.8f"%i)
            w_row.append("%.8f"%i)
        with open(filename, 'a') as csvfile:
            csvwriter = csv.writer(csvfile)
            csvwriter.writerow(w_row)
        row = pd.DataFrame([mt],
                           columns  = cols,
                           index = [attr]
                          )
        fair_metrics = fair_metrics.append(row)
    fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)
    return fair_metrics

def get_fair_metrics_and_plot(fname, data, model, plot=False, model_aif=False):
    pred = model.predict(data).labels if model_aif else model.predict(data.features)
    fair = fair_metrics(fname, data, pred)
    if plot:
        pass

    return fair

def get_model_performance(X_test, y_true, y_pred, probs):
    accuracy = accuracy_score(y_true, y_pred)
    matrix = confusion_matrix(y_true, y_pred)
    f1 = f1_score(y_true, y_pred)
    return accuracy, matrix, f1

def plot_model_performance(model, X_test, y_true):
    y_pred = model.predict(X_test)
    probs = model.predict_proba(X_test)
    accuracy, matrix, f1 = get_model_performance(X_test, y_true, y_pred, probs)

Local file to load metric values

In [9]:
filename= 'C:/Users/krish/Downloads/main_pjt_final - Copy/filename_mainpjt_results_gender_may6_jp.csv'

Converting data to aif compatible format

Since we are dealing with binary label dataset we are using aif360 class BiaryLabelDataset here with target label as CreditStatus and protected attributes as age,gender,marital status. Refer part 11 for more details on protected attributes and privileged classes.

In [10]:
# Fairness metrics
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.explainers import MetricTextExplainer
from aif360.metrics import ClassificationMetric
# Get DF into IBM format
from aif360 import datasets
#converting to aif dataset
aif_dataset = datasets.BinaryLabelDataset(favorable_label = 1, unfavorable_label = 0, df=German_df,
                                                      label_names=["CreditStatus"],
                                                     protected_attribute_names=["Gender"],
                                              privileged_protected_attributes = [1])
In [11]:
#dataset_orig = GermanDataset(protected_attribute_names=['sex'],
#                            privileged_classes=[[1]],
#                            features_to_keep=['age', 'sex', 'employment', 'housing', 'savings', 'credit_amount', 'month', 'purpose'],
#                            custom_preprocessing=custom_preprocessing)

Splitting data to train and test sets

In [12]:
#privileged_groups = [{'Age':1},{' Gender': 1},{'Marital_Status':1}]
#unprivileged_groups = [{'Age':0},{'Gender': 0},{'Marital_Status':0}]
In [13]:
privileged_groups = [{'Gender': 1}]
unprivileged_groups = [{'Gender': 0}]
In [14]:
data_orig_train, data_orig_test = aif_dataset.split([0.8], shuffle=True)

X_train = data_orig_train.features
y_train = data_orig_train.labels.ravel()

X_test = data_orig_test.features
y_test = data_orig_test.labels.ravel()
In [15]:
X_train.shape
X_test.shape
Out[15]:
(800, 16)
Out[15]:
(200, 16)
In [16]:
data_orig_test.labels[:10].ravel()
Out[16]:
array([1., 0., 1., 0., 1., 0., 1., 1., 1., 1.])
In [17]:
data_orig_train.labels[:10].ravel()
Out[17]:
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 0.])

Testing bias with respect to protected variable

In [18]:
metric_orig_train = BinaryLabelDatasetMetric(data_orig_train, 
                                             unprivileged_groups=unprivileged_groups,
                                             privileged_groups=privileged_groups)
print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_train.mean_difference())
Difference in mean outcomes between unprivileged and privileged groups = -0.115809

A non zero value indicates bias.

Building ML model

Considering ensemble models for our study.

1. RANDOM FOREST CLASSIFIER MODEL

In [19]:
#Seting the Hyper Parameters
param_grid = {"max_depth": [3,5,7, 10,None],
              "n_estimators":[3,5,10,25,50,150],
              "max_features": [4,7,15,20]}
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
#Creating the classifier
rf_model = RandomForestClassifier(random_state=40)
grid_search = GridSearchCV(rf_model, param_grid=param_grid, cv=5, scoring='recall', verbose=0)
model_rf = grid_search
In [20]:
mdl_rf = model_rf.fit(data_orig_train.features, data_orig_train.labels.ravel())
In [21]:
from sklearn.metrics import confusion_matrix
conf_mat_rf = confusion_matrix(data_orig_test.labels.ravel(), model_rf.predict(data_orig_test.features))
conf_mat_rf
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), model_rf.predict(data_orig_test.features)))
Out[21]:
array([[  9,  54],
       [  3, 134]], dtype=int64)
0.715
In [22]:
unique, counts = np.unique(data_orig_test.labels.ravel(), return_counts=True)
dict(zip(unique, counts))
Out[22]:
{0.0: 63, 1.0: 137}

1.a. Feature importance of model

In [23]:
importances = model_rf.best_estimator_.feature_importances_
indices = np.argsort(importances)
features = data_orig_train.feature_names
#https://stackoverflow.com/questions/48377296/get-feature-importance-from-gridsearchcv
In [24]:
importances
Out[24]:
array([0.02426046, 0.03423814, 0.0179973 , 0.23962379, 0.30426023,
       0.06173495, 0.04371371, 0.00876601, 0.00778448, 0.1201465 ,
       0.00395691, 0.01594455, 0.00173155, 0.10182759, 0.00454334,
       0.00947048])
In [25]:
importances[indices]
Out[25]:
array([0.00173155, 0.00395691, 0.00454334, 0.00778448, 0.00876601,
       0.00947048, 0.01594455, 0.0179973 , 0.02426046, 0.03423814,
       0.04371371, 0.06173495, 0.10182759, 0.1201465 , 0.23962379,
       0.30426023])
In [26]:
features
Out[26]:
['Gender',
 'Age',
 'Marital_Status',
 'CurrentAcc_None',
 'CurrentAcc_LT200',
 'Savings_LT500',
 'CreditHistory_none/paid',
 'Debtors_co-applicant',
 'Job_unskilled-resident',
 'NumMonths',
 'Telephone',
 'Purpose_education',
 'Purpose_furniture/equip',
 'CreditAmount',
 'Foreignworker',
 'Debtors_guarantor']
In [27]:
plt.figure(figsize=(20,30))
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
Out[27]:
<Figure size 1440x2160 with 0 Axes>
Out[27]:
Text(0.5, 1.0, 'Feature Importances')
Out[27]:
<BarContainer object of 16 artists>
Out[27]:
([<matplotlib.axis.YTick at 0x1594d76d288>,
  <matplotlib.axis.YTick at 0x1594d783448>,
  <matplotlib.axis.YTick at 0x1594d75d448>,
  <matplotlib.axis.YTick at 0x1594e77a888>,
  <matplotlib.axis.YTick at 0x1594e77a088>,
  <matplotlib.axis.YTick at 0x1594e77a988>,
  <matplotlib.axis.YTick at 0x1594e799808>,
  <matplotlib.axis.YTick at 0x1594e77df08>,
  <matplotlib.axis.YTick at 0x1594e77d548>,
  <matplotlib.axis.YTick at 0x1594e792b88>,
  <matplotlib.axis.YTick at 0x1594e792788>,
  <matplotlib.axis.YTick at 0x1594e787cc8>,
  <matplotlib.axis.YTick at 0x1594e79e888>,
  <matplotlib.axis.YTick at 0x1594e79eb48>,
  <matplotlib.axis.YTick at 0x1594e792d88>,
  <matplotlib.axis.YTick at 0x1594e7993c8>],
 [Text(0, 0, 'Purpose_furniture/equip'),
  Text(0, 0, 'Telephone'),
  Text(0, 0, 'Foreignworker'),
  Text(0, 0, 'Job_unskilled-resident'),
  Text(0, 0, 'Debtors_co-applicant'),
  Text(0, 0, 'Debtors_guarantor'),
  Text(0, 0, 'Purpose_education'),
  Text(0, 0, 'Marital_Status'),
  Text(0, 0, 'Gender'),
  Text(0, 0, 'Age'),
  Text(0, 0, 'CreditHistory_none/paid'),
  Text(0, 0, 'Savings_LT500'),
  Text(0, 0, 'CreditAmount'),
  Text(0, 0, 'NumMonths'),
  Text(0, 0, 'CurrentAcc_None'),
  Text(0, 0, 'CurrentAcc_LT200')])
Out[27]:
Text(0.5, 0, 'Relative Importance')

1.b. Model Explainability/interpretability

1.b.1 Using SHAP (SHapley Additive exPlanations)

In [28]:
import shap

Test data interpretation

In [29]:
rf_explainer = shap.KernelExplainer(model_rf.predict, data_orig_test.features)
rf_shap_values = rf_explainer.shap_values(data_orig_test.features,nsamples=50)
#https://towardsdatascience.com/explain-any-models-with-the-shap-values-use-the-kernelexplainer-79de9464897a
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [30]:
rf_shap_values
Out[30]:
array([[ 0.        , -0.01322033,  0.        , ..., -0.04903036,
         0.        , -0.00162768],
       [ 0.02327716,  0.05950923,  0.        , ...,  0.0860185 ,
         0.        ,  0.        ],
       [ 0.0028804 ,  0.00565615,  0.00516581, ...,  0.00415378,
         0.        ,  0.01580578],
       ...,
       [ 0.00339202,  0.        ,  0.        , ...,  0.01092601,
         0.        ,  0.01088422],
       [ 0.00616199,  0.00386968,  0.00668135, ...,  0.01423018,
         0.        ,  0.        ],
       [ 0.        ,  0.07118312,  0.        , ..., -0.10428731,
         0.01920153, -0.02762703]])
In [31]:
rf_explainer.expected_value
Out[31]:
0.9399999999999998
In [32]:
y_test_predict=model_rf.predict(data_orig_test.features)
y_test_predict[:12]
data_orig_test.labels[:12].ravel()
data_orig_test.features[:2,:]
Out[32]:
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Out[32]:
array([1., 0., 1., 0., 1., 0., 1., 1., 1., 1., 1., 1.])
Out[32]:
array([[ 1.        ,  0.        ,  1.        ,  0.        ,  0.        ,
         1.        ,  1.        ,  0.        ,  0.        , 36.        ,
         0.        ,  0.        ,  0.        ,  0.30802245,  1.        ,
         0.        ],
       [ 1.        ,  1.        ,  0.        ,  0.        ,  1.        ,
         1.        ,  1.        ,  0.        ,  0.        , 36.        ,
         0.        ,  0.        ,  0.        ,  0.11290855,  1.        ,
         0.        ]])
In [33]:
y_test_predict.mean()
Out[33]:
0.94

The explainer expected value is the average model predicted value on input data. Shapely helps to understand how individual features impact the output of each individual instance. The shapely values are model predicted values which may not coincide with actual y test values due to prediction error.

link=”logit” argument converts the logit values to probability

In [34]:
shap.initjs()
shap.force_plot(rf_explainer.expected_value,rf_shap_values[0],data_orig_test.features[0],data_orig_test.feature_names,link='logit')
#https://github.com/slundberg/shap
#https://github.com/slundberg/shap/issues/279
#https://github.com/slundberg/shap/issues/977
shap.initjs()
shap.force_plot(rf_explainer.expected_value,rf_shap_values[0],data_orig_test.features[0],data_orig_test.feature_names)
Out[34]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
Out[34]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.

Features in blue pushes the base value towards lowest values and features in red moves base levels towards higher values.

Shapley values calculate the importance of a feature by comparing what a model predicts with and without the feature. However, since the order in which a model sees features can affect its predictions, this is done in every possible order, so that the features are fairly compared.

The SHAP plot shows features that contribute to pushing the output from the base value (average model output) to the actual predicted value.

In [35]:
shap.initjs()
shap.force_plot(rf_explainer.expected_value,rf_shap_values[1], data_orig_test.features[1],data_orig_test.feature_names,link='logit')
shap.initjs()
shap.force_plot(rf_explainer.expected_value,rf_shap_values[1], data_orig_test.features[1],data_orig_test.feature_names)
Out[35]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
Out[35]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [36]:
data_orig_test.feature_names
Out[36]:
['Gender',
 'Age',
 'Marital_Status',
 'CurrentAcc_None',
 'CurrentAcc_LT200',
 'Savings_LT500',
 'CreditHistory_none/paid',
 'Debtors_co-applicant',
 'Job_unskilled-resident',
 'NumMonths',
 'Telephone',
 'Purpose_education',
 'Purpose_furniture/equip',
 'CreditAmount',
 'Foreignworker',
 'Debtors_guarantor']
In [37]:
shap.force_plot(rf_explainer.expected_value,
                rf_shap_values, data_orig_test.features[:,:],feature_names = data_orig_test.feature_names)
Out[37]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [38]:
p = shap.summary_plot(rf_shap_values, data_orig_test.features, feature_names=data_orig_test.feature_names,plot_type="bar") 
display(p)
None

Variables with higher impact are displayed at the credit history, credit amount,num of months.

In [39]:
shap.decision_plot(rf_explainer.expected_value, rf_shap_values,feature_names=data_orig_test.feature_names)
  • The x-axis represents the model's output. In this case, the units are log odds.
  • The plot is centered on the x-axis at explainer.expected_value.
  • All SHAP values are relative to the model's expected value like a linear model's effects are relative to the intercept.
  • The y-axis lists the model's features.
  • By default, the features are ordered by descending importance. The importance is calculated over the observations plotted. This is usually different than the importance ordering for the entire dataset.
  • In addition to feature importance ordering, the decision plot also supports hierarchical cluster feature ordering and user-defined feature ordering.
  • Each observation's prediction is represented by a colored line. At the top of the plot, each line strikes the x-axis at its corresponding observation's predicted value. This value determines the color of the line on a spectrum.
  • Moving from the bottom of the plot to the top, SHAP values for each feature are added to the model's base value. This shows how each feature contributes to the overall prediction.
  • At the bottom of the plot, the observations converge at explainer.expected_value https://slundberg.github.io/shap/notebooks/plots/decision_plot.html

Like the force plot, the decision plot supports link='logit' to transform log odds to probabilities.

In [40]:
shap.decision_plot(rf_explainer.expected_value, rf_shap_values,feature_names=data_orig_test.feature_names,link='logit')
In [41]:
shap.plots._waterfall.waterfall_legacy(rf_explainer.expected_value, rf_shap_values[0],feature_names=data_orig_test.feature_names)

For first instace of input,out of all the displayed variables, CreditHistory is playing major role is pushing the target variable outcome towards predicting 1.

Interpretation of graph: https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html

f(x)- model output impacted by features; E(f(x))- expected output.

One the fundemental properties of Shapley values is that they always sum up to the difference between the game outcome when all players are present and the game outcome when no players are present. For machine learning models this means that SHAP values of all the input features will always sum up to the difference between baseline (expected) model output and the current model output for the prediction being explained.

Shapley values calculate the importance of a feature by comparing what a model predicts with and without the feature. However, since the order in which a model sees features can affect its predictions, this is done in every possible order, so that the features are fairly compared. https://medium.com/@gabrieltseng/interpreting-complex-models-with-shap-values-1c187db6ec83

In [42]:
shap.plots._waterfall.waterfall_legacy(rf_explainer.expected_value, rf_shap_values[1],feature_names=data_orig_test.feature_names)

For second instace of input,out of all the displayed variables, credit history is playing major role is pushing the target variable outcome towards predicting 1.

1.b.2 Using ELI5

In [43]:
#!pip install eli5
import eli5
In [44]:
from eli5.sklearn import PermutationImportance
In [45]:
perm_rf = PermutationImportance(mdl_rf).fit(data_orig_test.features, data_orig_test.labels.ravel())

Feature Importance

In [46]:
perm_imp_1=eli5.show_weights(perm_rf,feature_names = data_orig_test.feature_names)
perm_imp_1
plt.show()
Out[46]:
Weight Feature
0.0131 ± 0.0170 CreditHistory_none/paid
0.0029 ± 0.0072 Savings_LT500
0 ± 0.0000 Debtors_guarantor
0 ± 0.0000 Foreignworker
0 ± 0.0000 Purpose_furniture/equip
0 ± 0.0000 Purpose_education
0 ± 0.0000 Telephone
0 ± 0.0000 Debtors_co-applicant
-0.0015 ± 0.0194 NumMonths
-0.0015 ± 0.0109 CurrentAcc_LT200
-0.0029 ± 0.0072 Job_unskilled-resident
-0.0058 ± 0.0234 CreditAmount
-0.0073 ± 0.0092 Marital_Status
-0.0102 ± 0.0117 CurrentAcc_None
-0.0102 ± 0.0117 Age
-0.0102 ± 0.0117 Gender
  • eli5 provides a way to compute feature importances for any black-box estimator by measuring how score decreases when a feature is not available; the method is also known as “permutation importance” or “Mean Decrease Accuracy (MDA)”.
  • The first number in each row shows how much model performance decreased with a random shuffling (in this case, using "accuracy" as the performance metric).

  • Like most things in data science, there is some randomness to the exact performance change from a shuffling a column. We measure the amount of randomness in our permutation importance calculation by repeating the process with multiple shuffles. The number after the ± measures how performance varied from one-reshuffling to the next.

  • You'll occasionally see negative values for permutation importances. In those cases, the predictions on the shuffled (or noisy) data happened to be more accurate than the real data. This happens when the feature didn't matter (should have had an importance close to 0), but random chance caused the predictions on shuffled data to be more accurate. This is more common with small datasets, like the one in this example, because there is more room for luck/chance.

https://www.kaggle.com/dansbecker/permutation-importance

1.c. Measuring fairness

Of Baseline model

In [47]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(mdl_rf, X_test, y_test)
In [48]:
fair_rf = get_fair_metrics_and_plot(filename, data_orig_test, mdl_rf)
fair_rf
Computing fairness of the model.
Out[48]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.00 0.000000
Gender 0.715 0.824615 0.953104 -0.044647 -0.038364 -0.074567 -0.085997 0.94 0.071111
In [49]:
type(data_orig_train)
Out[49]:
aif360.datasets.binary_label_dataset.BinaryLabelDataset

PRE PROCESSING

In [50]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_rf = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_rf_rw = RW_rf.fit_transform(data_orig_train)
#train and save model
rf_transf_rw = model_rf.fit(data_transf_train_rf_rw.features,
                     data_transf_train_rf_rw.labels.ravel())

data_transf_test_rf_rw = RW_rf.transform(data_orig_test)
fair_rf_rw = get_fair_metrics_and_plot(filename, data_transf_test_rf_rw, rf_transf_rw, plot=False)
Computing fairness of the model.
In [51]:
metric_transf_train = BinaryLabelDatasetMetric(data_transf_train_rf_rw, 
                                               unprivileged_groups=unprivileged_groups,
                                               privileged_groups=privileged_groups)
print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_train.mean_difference())
Difference in mean outcomes between unprivileged and privileged groups = -0.000000
In [52]:
fair_rf_rw
Out[52]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.0000 1.000000 0.000000 0.000000 0.000000 0.000000 1.00 0.000000
Gender 0.700204 0.8138 0.969109 -0.029266 -0.038364 -0.074567 -0.165529 0.94 0.071111
In [53]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR_rf = DisparateImpactRemover()
data_transf_train_rf_dir = DIR_rf.fit_transform(data_orig_train)

# Train and save the model
rf_transf_dir = model_rf.fit(data_transf_train_rf_dir.features,data_transf_train_rf_dir.labels.ravel())
In [54]:
fair_dir_rf_dir = get_fair_metrics_and_plot(filename,data_orig_test, rf_transf_dir, plot=False)
fair_dir_rf_dir
Computing fairness of the model.
Out[54]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.00000 0.00000 0.000000 1.000 0.000000
Gender 0.735 0.834891 1.036759 0.033486 0.03125 0.00024 -0.109335 0.932 0.069555

INPROCESSING

In [55]:
#!pip install --user --upgrade tensorflow==1.15.0
#2.2.0
#!pip uninstall tensorflow
In [56]:
#!pip install "tensorflow==1.15"
#!pip install --upgrade tensorflow-hub
In [57]:
#%tensorflow_version 1.15
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [58]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [59]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [60]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope1',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_rf_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
#train and save the model
        debiased_model_rf_ad.fit(data_orig_train)
        fair_rf_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_rf_ad, plot=False, model_aif=True)
WARNING:tensorflow:From C:\Users\krish\Anaconda3\lib\site-packages\aif360\algorithms\inprocessing\adversarial_debiasing.py:89: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
WARNING:tensorflow:From C:\Users\krish\Anaconda3\lib\site-packages\tensorflow_core\python\ops\nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
epoch 0; iter: 0; batch classifier loss: 1.129487; batch adversarial loss: 0.629888
epoch 1; iter: 0; batch classifier loss: 0.910242; batch adversarial loss: 0.672216
epoch 2; iter: 0; batch classifier loss: 0.798675; batch adversarial loss: 0.627053
epoch 3; iter: 0; batch classifier loss: 0.756565; batch adversarial loss: 0.676061
epoch 4; iter: 0; batch classifier loss: 0.971732; batch adversarial loss: 0.673836
epoch 5; iter: 0; batch classifier loss: 0.687797; batch adversarial loss: 0.644488
epoch 6; iter: 0; batch classifier loss: 0.770778; batch adversarial loss: 0.618994
epoch 7; iter: 0; batch classifier loss: 0.673490; batch adversarial loss: 0.657255
epoch 8; iter: 0; batch classifier loss: 0.784610; batch adversarial loss: 0.633255
epoch 9; iter: 0; batch classifier loss: 0.574443; batch adversarial loss: 0.665192
Out[60]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x1594ef1dd08>
Computing fairness of the model.
In [61]:
fair_rf_ad
Out[61]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.0 0.0 0.0 0.0 0.000000 1 0.000000
Gender 0.685 0.813056 1.0 0.0 0.0 0.0 -0.101725 [1.0] 0.058241
In [62]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_pr_rf = PrejudiceRemover()

# Train and save the model
debiased_model_pr_rf.fit(data_orig_train)

fair_rf_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_pr_rf, plot=False, model_aif=True)
fair_rf_pr
Out[62]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x1594ef98908>
Computing fairness of the model.
Out[62]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.715 0.813115 0.817399 -0.161339 -0.143039 -0.22075 -0.035261 [0.8730000000000001] 0.119956
#
In [63]:
y_pred = debiased_model_pr_rf.predict(data_orig_test)


data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [64]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = mdl_rf.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = mdl_rf.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [65]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_rf = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_rf = EOPP_rf.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf_eopp = EOPP_rf.predict(data_orig_test_pred)
fair_rf_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf_eopp, pred_is_dataset=True)
Computing fairness of the model.
In [66]:
fair_rf_eo
Out[66]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.00000 1.00000 0.000000 0.00000 0.000000 0.000000 1 0.000000
Gender 0.73 0.83125 1.01643 0.014967 0.00686 -0.011955 -0.090817 [0.924] 0.074753
In [67]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_rf = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=42)

CPP_rf = CPP_rf.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf_cpp = CPP_rf.predict(data_orig_test_pred)
fair_rf_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf_cpp, pred_is_dataset=True)
Computing fairness of the model.
In [68]:
fair_rf_ceo
Out[68]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.00000 0.00000 0.000000 1 0.000000
Gender 0.725 0.829721 1.021399 0.019787 0.03125 -0.01976 -0.123034 [0.9319999999999999] 0.070372
In [69]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_rf = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_rf = ROC_rf.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf_roc = ROC_rf.predict(data_orig_test_pred)
fair_rf_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [70]:
fair_rf_roc
Out[70]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.0000 0.000000 0.000000 0.000000 1 0.00000
Gender 0.755 0.817844 1.053021 0.0345 -0.032012 0.006302 0.019533 [0.8210000000000001] 0.18172

2. XGBoost Classifier

In [71]:
from xgboost import XGBClassifier
estimator = XGBClassifier(seed=40)

parameters = {
    'max_depth': range (2, 10, 2),
    'n_estimators': range(60, 240, 40),
    'learning_rate': [0.1, 0.01, 0.05]
}
grid_search = GridSearchCV(
    estimator=estimator,
    param_grid=parameters,
    scoring = 'recall',
    
    cv = 5,
    verbose=0
)

model_xg=grid_search
In [72]:
mdl_xgb = model_xg.fit(data_orig_train.features, data_orig_train.labels.ravel())
In [73]:
conf_mat_xg = confusion_matrix(data_orig_test.labels.ravel(), model_xg.predict(data_orig_test.features))
conf_mat_xg
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), model_xg.predict(data_orig_test.features)))
Out[73]:
array([[ 15,  48],
       [  2, 135]], dtype=int64)
0.75

2.a. Feature importance of model

In [74]:
importances_xg = model_xg.best_estimator_.feature_importances_
indices_xg = np.argsort(importances_xg)
features = data_orig_train.feature_names
#https://stackoverflow.com/questions/48377296/get-feature-importance-from-gridsearchcv
In [75]:
importances_xg
Out[75]:
array([0.0681265 , 0.04106884, 0.        , 0.20567718, 0.3482808 ,
       0.08223109, 0.07956417, 0.        , 0.        , 0.07214887,
       0.        , 0.0159479 , 0.        , 0.0294841 , 0.        ,
       0.0574706 ], dtype=float32)
In [76]:
importances_xg[indices_xg]
Out[76]:
array([0.        , 0.        , 0.        , 0.        , 0.        ,
       0.        , 0.0159479 , 0.0294841 , 0.04106884, 0.0574706 ,
       0.0681265 , 0.07214887, 0.07956417, 0.08223109, 0.20567718,
       0.3482808 ], dtype=float32)
In [77]:
features
Out[77]:
['Gender',
 'Age',
 'Marital_Status',
 'CurrentAcc_None',
 'CurrentAcc_LT200',
 'Savings_LT500',
 'CreditHistory_none/paid',
 'Debtors_co-applicant',
 'Job_unskilled-resident',
 'NumMonths',
 'Telephone',
 'Purpose_education',
 'Purpose_furniture/equip',
 'CreditAmount',
 'Foreignworker',
 'Debtors_guarantor']
In [78]:
plt.figure(figsize=(20,30))
plt.title('Feature Importances')
plt.barh(range(len(indices_xg)), importances_xg[indices_xg], color='b', align='center')
plt.yticks(range(len(indices_xg)), [features[i] for i in indices_xg])
plt.xlabel('Relative Importance')
plt.show()
Out[78]:
<Figure size 1440x2160 with 0 Axes>
Out[78]:
Text(0.5, 1.0, 'Feature Importances')
Out[78]:
<BarContainer object of 16 artists>
Out[78]:
([<matplotlib.axis.YTick at 0x15953509088>,
  <matplotlib.axis.YTick at 0x159534f1608>,
  <matplotlib.axis.YTick at 0x15953516548>,
  <matplotlib.axis.YTick at 0x1595358c848>,
  <matplotlib.axis.YTick at 0x15953591148>,
  <matplotlib.axis.YTick at 0x15953591848>,
  <matplotlib.axis.YTick at 0x15953594148>,
  <matplotlib.axis.YTick at 0x15953594848>,
  <matplotlib.axis.YTick at 0x15953591f88>,
  <matplotlib.axis.YTick at 0x15953599188>,
  <matplotlib.axis.YTick at 0x15953599848>,
  <matplotlib.axis.YTick at 0x1595359e248>,
  <matplotlib.axis.YTick at 0x1595359eb88>,
  <matplotlib.axis.YTick at 0x159535a1588>,
  <matplotlib.axis.YTick at 0x159535a1b08>,
  <matplotlib.axis.YTick at 0x159535a4948>],
 [Text(0, 0, 'Marital_Status'),
  Text(0, 0, 'Debtors_co-applicant'),
  Text(0, 0, 'Job_unskilled-resident'),
  Text(0, 0, 'Telephone'),
  Text(0, 0, 'Purpose_furniture/equip'),
  Text(0, 0, 'Foreignworker'),
  Text(0, 0, 'Purpose_education'),
  Text(0, 0, 'CreditAmount'),
  Text(0, 0, 'Age'),
  Text(0, 0, 'Debtors_guarantor'),
  Text(0, 0, 'Gender'),
  Text(0, 0, 'NumMonths'),
  Text(0, 0, 'CreditHistory_none/paid'),
  Text(0, 0, 'Savings_LT500'),
  Text(0, 0, 'CurrentAcc_None'),
  Text(0, 0, 'CurrentAcc_LT200')])
Out[78]:
Text(0.5, 0, 'Relative Importance')

2.b. Model Explainability/interpretability

2.b.1 Using SHAP (SHapley Additive exPlanations)

In [79]:
import shap
xg_shap_values_t1 = shap.KernelExplainer(mdl_xgb.predict,data_orig_train.features)
Using 800 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.

Test data interpretation

In [80]:
xgb_explainer = shap.KernelExplainer(mdl_xgb.predict, data_orig_test.features)
xgb_shap_values = xgb_explainer.shap_values(data_orig_test.features,nsamples=10)
#https://towardsdatascience.com/explain-any-models-with-the-shap-values-use-the-kernelexplainer-79de9464897a
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [81]:
xgb_shap_values
Out[81]:
array([[ 0.     ,  0.     , -0.2225 , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     ,  0.     ,  0.     , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     ,  0.0225 ,  0.     , ...,  0.     ,  0.     ,  0.     ],
       ...,
       [ 0.     ,  0.     ,  0.     , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     ,  0.     ,  0.     , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     , -0.09625,  0.     , ...,  0.     ,  0.     ,  0.     ]])
In [82]:
shap.initjs()
shap.force_plot(xgb_explainer.expected_value,xgb_shap_values[0,:], data_orig_test.features[0],data_orig_test.feature_names,link='logit')
#https://github.com/slundberg/shap
#https://github.com/slundberg/shap/issues/279
Out[82]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [83]:
shap.initjs()
shap.force_plot(xgb_explainer.expected_value,xgb_shap_values[1,:], data_orig_test.features[1],data_orig_test.feature_names,link='logit')
Out[83]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [84]:
shap.force_plot(xgb_explainer.expected_value,
                xgb_shap_values, data_orig_test.features[:,:],feature_names = data_orig_test.feature_names)
Out[84]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [85]:
p = shap.summary_plot(xgb_shap_values, data_orig_test.features, feature_names=data_orig_test.feature_names,plot_type="bar") 
display(p)
None

The variables with higher impact are the ones in the top.

In [86]:
shap.plots._waterfall.waterfall_legacy(xgb_explainer.expected_value, xgb_shap_values[0,:],feature_names=data_orig_test.feature_names)

Here credit history none/paid is moving target outcome towards right i.e., 1.

Interpretation of graph: https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html

f(x)- model output impacted by features; E(f(x))- expected output.

One the fundemental properties of Shapley values is that they always sum up to the difference between the game outcome when all players are present and the game outcome when no players are present. For machine learning models this means that SHAP values of all the input features will always sum up to the difference between baseline (expected) model output and the current model output for the prediction being explained.

In [87]:
shap.plots._waterfall.waterfall_legacy(xgb_explainer.expected_value, xgb_shap_values[1],feature_names=data_orig_test.feature_names)

Here Credit History and Age are moving the target result towards right.

2.b.2 Using ELI5

In [88]:
#!pip install eli5
import eli5
from eli5.sklearn import PermutationImportance
In [89]:
perm_xgb = PermutationImportance(mdl_xgb).fit(data_orig_test.features, data_orig_test.labels.ravel())

Feature Importance

In [90]:
perm_imp_2=eli5.show_weights(perm_xgb,feature_names = data_orig_test.feature_names)
perm_imp_2
plt.show()
Out[90]:
Weight Feature
0.0380 ± 0.0251 NumMonths
0.0131 ± 0.0109 CreditHistory_none/paid
0.0102 ± 0.0072 Savings_LT500
0.0058 ± 0.0058 CurrentAcc_LT200
0 ± 0.0000 Foreignworker
0 ± 0.0000 CreditAmount
0 ± 0.0000 Purpose_furniture/equip
0 ± 0.0000 Purpose_education
0 ± 0.0000 Telephone
0 ± 0.0000 Job_unskilled-resident
0 ± 0.0000 Debtors_co-applicant
0 ± 0.0000 Marital_Status
0 ± 0.0000 Age
-0.0015 ± 0.0058 Debtors_guarantor
-0.0015 ± 0.0143 Gender
-0.0058 ± 0.0143 CurrentAcc_None

2.c. Measuring fairness

Of Baseline model

In [91]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(mdl_xgb, X_test, y_test)
In [92]:
fair_xg = get_fair_metrics_and_plot(filename, data_orig_test, model_xg)
fair_xg
Computing fairness of the model.
Out[92]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.00000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.75 0.84375 0.961317 -0.035769 -0.013974 -0.099294 -0.114155 0.931 0.063482

PRE PROCESSING

In [93]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_xg = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_xg_rw = RW_xg.fit_transform(data_orig_train)

#train and save model
xg_transf_rw = model_xg.fit(data_transf_train_xg_rw.features,
                     data_transf_train_xg_rw.labels.ravel())

data_transf_test_xg_rw = RW_xg.transform(data_orig_test)
fair_xg_rw = get_fair_metrics_and_plot(filename, data_transf_test_xg_rw, xg_transf_rw, plot=False)
Computing fairness of the model.
In [94]:
fair_xg_rw
Out[94]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.735743 0.833333 0.992918 -0.006491 -0.013974 -0.099294 -0.182658 0.931 0.063482
In [95]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR_xg = DisparateImpactRemover()
data_transf_train_xg_dir = DIR_xg.fit_transform(data_orig_train)

# Train and save the model
xg_transf_dir = model_xg.fit(data_transf_train_xg_dir.features,data_transf_train_xg_dir.labels.ravel())
In [96]:
fair_dir_xg_dir = get_fair_metrics_and_plot(filename,data_orig_test, xg_transf_dir, plot=False)
fair_dir_xg_dir
Computing fairness of the model.
Out[96]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.745 0.841121 0.954248 -0.042618 -0.013974 -0.109294 -0.121005 0.936 0.063943

INPROCESSING

In [97]:
#!pip install --user --upgrade tensorflow==1.15.0
#2.2.0
#!pip uninstall tensorflow
In [98]:
#!pip install "tensorflow==1.15"
#!pip install --upgrade tensorflow-hub
In [99]:
#%tensorflow_version 1.15
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [100]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [101]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [102]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope1',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_xg_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
#train and save the model
        debiased_model_xg_ad.fit(data_orig_train)
        fair_xg_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_xg_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.954649; batch adversarial loss: 0.638520
epoch 1; iter: 0; batch classifier loss: 0.785396; batch adversarial loss: 0.622496
epoch 2; iter: 0; batch classifier loss: 0.860919; batch adversarial loss: 0.744553
epoch 3; iter: 0; batch classifier loss: 0.685736; batch adversarial loss: 0.572103
epoch 4; iter: 0; batch classifier loss: 0.648918; batch adversarial loss: 0.603511
epoch 5; iter: 0; batch classifier loss: 0.728703; batch adversarial loss: 0.656826
epoch 6; iter: 0; batch classifier loss: 0.710048; batch adversarial loss: 0.627021
epoch 7; iter: 0; batch classifier loss: 0.628043; batch adversarial loss: 0.625900
epoch 8; iter: 0; batch classifier loss: 0.821251; batch adversarial loss: 0.598613
epoch 9; iter: 0; batch classifier loss: 0.638662; batch adversarial loss: 0.619882
Out[102]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x15953f90708>
Computing fairness of the model.
In [103]:
fair_xg_ad
Out[103]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.00000
Gender 0.685 0.809668 0.938786 -0.060375 -0.038364 -0.086105 -0.101725 [0.965] 0.07289
In [104]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_pr_xg = PrejudiceRemover()

# Train and save the model
debiased_model_pr_xg.fit(data_orig_train)

fair_xg_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_pr_xg, plot=False, model_aif=True)
fair_xg_pr
Out[104]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x1595413a588>
Computing fairness of the model.
Out[104]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.715 0.813115 0.817399 -0.161339 -0.143039 -0.22075 -0.035261 [0.8730000000000001] 0.119956
#
In [105]:
y_pred = debiased_model_pr_xg.predict(data_orig_test)


data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [106]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = mdl_xgb.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = mdl_xgb.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [107]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_xg = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_xg = EOPP_xg.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg_eopp = EOPP_xg.predict(data_orig_test_pred)
fair_xg_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg_eopp, pred_is_dataset=True)
Computing fairness of the model.
In [108]:
fair_xg_eo
Out[108]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.73 0.833333 1.013889 0.012938 -0.013974 0.00609 -0.065449 [0.9359999999999999] 0.065206
In [109]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_xg = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=42)

CPP_xg = CPP_xg.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg_cpp = CPP_xg.predict(data_orig_test_pred)
fair_xg_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg_cpp, pred_is_dataset=True)
Computing fairness of the model.
In [110]:
fair_xg_ceo
Out[110]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.00000 0.000000 0.000000 1 0.000000
Gender 0.725 0.831804 0.913928 -0.083714 -0.02439 -0.164503 -0.148402 [0.949] 0.060817
In [111]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_xg = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_xg = ROC_xg.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg_roc = ROC_xg.predict(data_orig_test_pred)
fair_xg_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [112]:
fair_xg_roc
Out[112]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.00000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.755 0.815094 1.057971 0.03653 -0.045986 0.019315 0.044901 [0.8380000000000001] 0.191206
In [ ]:
 
In [ ]:
 

3. XGBOOST with out hyper-parameter tuning

In [113]:
from xgboost import XGBClassifier
model_xgb2 = XGBClassifier(seed=40)
In [114]:
mdl_xgb2 = model_xgb2.fit(data_orig_train.features, data_orig_train.labels.ravel())
In [115]:
conf_mat_xg2 = confusion_matrix(data_orig_test.labels.ravel(), model_xgb2.predict(data_orig_test.features))
conf_mat_xg2
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), model_xgb2.predict(data_orig_test.features)))
Out[115]:
array([[ 27,  36],
       [ 29, 108]], dtype=int64)
0.675

3.a. Feature importance of model

In [116]:
importances_xg2 = model_xgb2.feature_importances_
indices_xg2 = np.argsort(importances_xg2)
features2 = data_orig_train.feature_names
#https://stackoverflow.com/questions/48377296/get-feature-importance-from-gridsearchcv
In [117]:
importances_xg2
Out[117]:
array([0.0489691 , 0.04436447, 0.02988521, 0.02598947, 0.32962602,
       0.05633068, 0.04220858, 0.03167811, 0.03903325, 0.0417227 ,
       0.03801652, 0.0680264 , 0.03471561, 0.03401502, 0.02714077,
       0.10827809], dtype=float32)
In [118]:
importances_xg2[indices_xg2]
Out[118]:
array([0.02598947, 0.02714077, 0.02988521, 0.03167811, 0.03401502,
       0.03471561, 0.03801652, 0.03903325, 0.0417227 , 0.04220858,
       0.04436447, 0.0489691 , 0.05633068, 0.0680264 , 0.10827809,
       0.32962602], dtype=float32)
In [119]:
features2
Out[119]:
['Gender',
 'Age',
 'Marital_Status',
 'CurrentAcc_None',
 'CurrentAcc_LT200',
 'Savings_LT500',
 'CreditHistory_none/paid',
 'Debtors_co-applicant',
 'Job_unskilled-resident',
 'NumMonths',
 'Telephone',
 'Purpose_education',
 'Purpose_furniture/equip',
 'CreditAmount',
 'Foreignworker',
 'Debtors_guarantor']
In [120]:
plt.figure(figsize=(20,30))
plt.title('Feature Importances')
plt.barh(range(len(indices_xg2)), importances_xg2[indices_xg2], color='b', align='center')
plt.yticks(range(len(indices_xg2)), [features2[i] for i in indices_xg2])
plt.xlabel('Relative Importance')
plt.show()
Out[120]:
<Figure size 1440x2160 with 0 Axes>
Out[120]:
Text(0.5, 1.0, 'Feature Importances')
Out[120]:
<BarContainer object of 16 artists>
Out[120]:
([<matplotlib.axis.YTick at 0x15953e57208>,
  <matplotlib.axis.YTick at 0x15953e53988>,
  <matplotlib.axis.YTick at 0x15953e52748>,
  <matplotlib.axis.YTick at 0x15953eaa548>,
  <matplotlib.axis.YTick at 0x15953eaac88>,
  <matplotlib.axis.YTick at 0x15953eae488>,
  <matplotlib.axis.YTick at 0x15953eaec88>,
  <matplotlib.axis.YTick at 0x15953eb3488>,
  <matplotlib.axis.YTick at 0x15953eb3c48>,
  <matplotlib.axis.YTick at 0x15953eb85c8>,
  <matplotlib.axis.YTick at 0x15953eb8b08>,
  <matplotlib.axis.YTick at 0x15953ebb948>,
  <matplotlib.axis.YTick at 0x15953eb3b88>,
  <matplotlib.axis.YTick at 0x15953ebbec8>,
  <matplotlib.axis.YTick at 0x15953ec1388>,
  <matplotlib.axis.YTick at 0x15953ec1bc8>],
 [Text(0, 0, 'CurrentAcc_None'),
  Text(0, 0, 'Foreignworker'),
  Text(0, 0, 'Marital_Status'),
  Text(0, 0, 'Debtors_co-applicant'),
  Text(0, 0, 'CreditAmount'),
  Text(0, 0, 'Purpose_furniture/equip'),
  Text(0, 0, 'Telephone'),
  Text(0, 0, 'Job_unskilled-resident'),
  Text(0, 0, 'NumMonths'),
  Text(0, 0, 'CreditHistory_none/paid'),
  Text(0, 0, 'Age'),
  Text(0, 0, 'Gender'),
  Text(0, 0, 'Savings_LT500'),
  Text(0, 0, 'Purpose_education'),
  Text(0, 0, 'Debtors_guarantor'),
  Text(0, 0, 'CurrentAcc_LT200')])
Out[120]:
Text(0.5, 0, 'Relative Importance')

3.b. Model Explainability/interpretability

3.b.1 Using SHAP (SHapley Additive exPlanations)

In [121]:
import shap
xg_shap_values_t = shap.KernelExplainer(mdl_xgb2.predict,data_orig_train.features)
Using 800 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.

Test data interpretation

In [122]:
xgb_explainer2 = shap.KernelExplainer(mdl_xgb2.predict, data_orig_test.features)
xgb_shap_values2 = xgb_explainer2.shap_values(data_orig_test.features,nsamples=10)
#https://towardsdatascience.com/explain-any-models-with-the-shap-values-use-the-kernelexplainer-79de9464897a
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [123]:
xgb_shap_values2
Out[123]:
array([[ 0.    ,  0.    ,  0.    , ...,  0.28  ,  0.    ,  0.    ],
       [ 0.    ,  0.    , -0.285 , ..., -0.175 ,  0.    ,  0.    ],
       [ 0.    ,  0.2025,  0.    , ...,  0.    ,  0.    ,  0.    ],
       ...,
       [ 0.    ,  0.    ,  0.08  , ...,  0.    ,  0.    ,  0.    ],
       [ 0.    ,  0.    ,  0.    , ...,  0.    ,  0.    ,  0.    ],
       [ 0.    , -0.35  ,  0.    , ..., -0.29  ,  0.    ,  0.    ]])
In [124]:
shap.initjs()
shap.force_plot(xgb_explainer2.expected_value,xgb_shap_values2[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit')
#https://github.com/slundberg/shap
#https://github.com/slundberg/shap/issues/279
Out[124]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [125]:
shap.initjs()
shap.force_plot(xgb_explainer2.expected_value,xgb_shap_values2[1,:],  data_orig_test.features[1],data_orig_test.feature_names,link='logit')
Out[125]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [126]:
data_orig_test.feature_names
Out[126]:
['Gender',
 'Age',
 'Marital_Status',
 'CurrentAcc_None',
 'CurrentAcc_LT200',
 'Savings_LT500',
 'CreditHistory_none/paid',
 'Debtors_co-applicant',
 'Job_unskilled-resident',
 'NumMonths',
 'Telephone',
 'Purpose_education',
 'Purpose_furniture/equip',
 'CreditAmount',
 'Foreignworker',
 'Debtors_guarantor']
In [127]:
shap.force_plot(xgb_explainer2.expected_value,
                xgb_shap_values2, data_orig_test.features[:,:],feature_names = data_orig_test.feature_names)
Out[127]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [128]:
p = shap.summary_plot(xgb_shap_values2, data_orig_test.features, feature_names=data_orig_test.feature_names,plot_type="bar") 
display(p)
None

The variables with higher impact are at the top.

In [129]:
shap.plots._waterfall.waterfall_legacy(xgb_explainer2.expected_value, xgb_shap_values2[0,:],feature_names=data_orig_test.feature_names)

Interpretation of graph: https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html

  • f(x)- model output impacted by features; E(f(x))- expected output.

  • One the fundemental properties of Shapley values is that they always sum up to the difference between the game outcome when all players are present and the game outcome when no players are present. For machine learning models this means that SHAP values of all the input features will always sum up to the difference between baseline (expected) model output and the current model output for the prediction being explained.

In [130]:
shap.plots._waterfall.waterfall_legacy(xgb_explainer2.expected_value, xgb_shap_values2[1],feature_names=data_orig_test.feature_names)

3.b.2 Using ELI5

In [131]:
#!pip install eli5
import eli5
from eli5.sklearn import PermutationImportance
In [132]:
perm_xgb2 = PermutationImportance(mdl_xgb2).fit(data_orig_test.features, data_orig_test.labels.ravel())

Feature Importance

In [133]:
perm_imp_3=eli5.show_weights(perm_xgb2,feature_names = data_orig_test.feature_names)
perm_imp_3
plt.show()
Out[133]:
Weight Feature
0.0190 ± 0.0453 NumMonths
0.0070 ± 0.0265 CurrentAcc_LT200
0.0060 ± 0.0075 Debtors_guarantor
-0.0020 ± 0.0432 CreditAmount
-0.0020 ± 0.0049 Foreignworker
-0.0020 ± 0.0049 Debtors_co-applicant
-0.0030 ± 0.0185 Telephone
-0.0040 ± 0.0183 Job_unskilled-resident
-0.0050 ± 0.0110 Purpose_education
-0.0060 ± 0.0183 Savings_LT500
-0.0060 ± 0.0133 Purpose_furniture/equip
-0.0070 ± 0.0224 CurrentAcc_None
-0.0070 ± 0.0102 Marital_Status
-0.0130 ± 0.0162 Age
-0.0160 ± 0.0160 CreditHistory_none/paid
-0.0200 ± 0.0379 Gender

Explaining individual predictions

In [134]:
from eli5 import show_prediction
show_prediction(mdl_xgb2, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[134]:

y=0.0 (probability 0.571, score -0.288) top features

Contribution? Feature Value
+0.716 CurrentAcc_LT200 1.000
+0.656 NumMonths 36.000
+0.415 Savings_LT500 1.000
+0.298 Marital_Status 0.000
+0.132 CurrentAcc_None 0.000
+0.121 CreditHistory_none/paid 1.000
+0.074 Telephone 0.000
+0.073 Debtors_guarantor 0.000
+0.013 Foreignworker 1.000
-0.018 Debtors_co-applicant 0.000
-0.019 Job_unskilled-resident 0.000
-0.040 Purpose_education 0.000
-0.070 Purpose_furniture/equip 0.000
-0.136 Gender 1.000
-0.238 Age 1.000
-0.648 CreditAmount 0.113
-1.042 <BIAS> 1.000
In [ ]:
 

3.c. Measuring fairness

Of Baseline model

In [135]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(mdl_xgb2, X_test, y_test)
In [136]:
fair_xg2 = get_fair_metrics_and_plot(filename, data_orig_test, mdl_xgb2)
fair_xg2
Computing fairness of the model.
Out[136]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.675 0.768683 0.741724 -0.199899 -0.254827 -0.196644 0.112887 0.769 0.206693

PRE PROCESSING

In [137]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_xg2 = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_xg2_rw = RW_xg2.fit_transform(data_orig_train)

#train and save model
xg2_transf_rw = model_xgb2.fit(data_transf_train_xg2_rw.features,
                     data_transf_train_xg2_rw.labels.ravel())

data_transf_test_xg2_rw = RW_xg2.transform(data_orig_test)
fair_xg2_rw = get_fair_metrics_and_plot(filename, data_transf_test_xg2_rw, xg2_transf_rw, plot=False)
Computing fairness of the model.
In [138]:
fair_xg2_rw
Out[138]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.661128 0.755126 0.764151 -0.179871 -0.254827 -0.196644 0.088825 0.769 0.206693
In [139]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR_xg2 = DisparateImpactRemover()
data_transf_train_xg2_dir = DIR_xg2.fit_transform(data_orig_train)

# Train and save the model
xg2_transf_dir = model_xgb2.fit(data_transf_train_xg2_dir.features,data_transf_train_xg2_dir.labels.ravel())
In [140]:
fair_dir_xg2_dir = get_fair_metrics_and_plot(filename,data_orig_test, xg2_transf_dir, plot=False)
fair_dir_xg2_dir
Computing fairness of the model.
Out[140]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.69 0.789116 0.747365 -0.212836 -0.268547 -0.205043 0.108067 0.773 0.163045

INPROCESSING

In [141]:
#!pip install --user --upgrade tensorflow==1.15.0
#2.2.0
#!pip uninstall tensorflow
In [142]:
#!pip install "tensorflow==1.15"
#!pip install --upgrade tensorflow-hub
In [143]:
#%tensorflow_version 1.15
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [144]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [145]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [146]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope1',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_xg2_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
#train and save the model
        debiased_model_xg2_ad.fit(data_orig_train)
        fair_xg2_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_xg2_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.954649; batch adversarial loss: 0.638520
epoch 1; iter: 0; batch classifier loss: 0.785396; batch adversarial loss: 0.622496
epoch 2; iter: 0; batch classifier loss: 0.860919; batch adversarial loss: 0.744553
epoch 3; iter: 0; batch classifier loss: 0.685736; batch adversarial loss: 0.572103
epoch 4; iter: 0; batch classifier loss: 0.648918; batch adversarial loss: 0.603511
epoch 5; iter: 0; batch classifier loss: 0.728703; batch adversarial loss: 0.656826
epoch 6; iter: 0; batch classifier loss: 0.710048; batch adversarial loss: 0.627021
epoch 7; iter: 0; batch classifier loss: 0.628043; batch adversarial loss: 0.625900
epoch 8; iter: 0; batch classifier loss: 0.821251; batch adversarial loss: 0.598613
epoch 9; iter: 0; batch classifier loss: 0.638662; batch adversarial loss: 0.619882
Out[146]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x159583e2bc8>
Computing fairness of the model.
In [147]:
fair_xg2_ad
Out[147]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.00000
Gender 0.685 0.809668 0.938786 -0.060375 -0.038364 -0.086105 -0.101725 [0.965] 0.07289
In [148]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_pr_xg2 = PrejudiceRemover()

# Train and save the model
debiased_model_pr_xg2.fit(data_orig_train)

fair_xg2_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_pr_xg2, plot=False, model_aif=True)
fair_xg2_pr
Out[148]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x15958f89c08>
Computing fairness of the model.
Out[148]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.715 0.813115 0.817399 -0.161339 -0.143039 -0.22075 -0.035261 [0.8730000000000001] 0.119956
#
In [149]:
y_pred = debiased_model_pr_xg2.predict(data_orig_test)


data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [150]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = mdl_xgb2.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = mdl_xgb2.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [151]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_xg2 = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_xg2 = EOPP_xg2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg2_eopp = EOPP_xg2.predict(data_orig_test_pred)
fair_xg2_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg2_eopp, pred_is_dataset=True)
Computing fairness of the model.
In [152]:
fair_xg2_eo
Out[152]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.565 0.666667 1.021399 0.013191 0.033537 -0.014001 -0.063166 [0.6419999999999998] 0.341502
In [153]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_xg2 = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=42)

CPP_xg2 = CPP_xg2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg2_cpp = CPP_xg2.predict(data_orig_test_pred)
fair_xg2_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg2_cpp, pred_is_dataset=True)
Computing fairness of the model.
In [154]:
fair_xg2_ceo
Out[154]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.00000 0.00000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.645 0.776025 0.62963 -0.37037 -0.341463 -0.401501 0.046423 [0.9] 0.130436
In [155]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_xg2 = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_xg2 = ROC_xg2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg2_roc = ROC_xg2.predict(data_orig_test_pred)
fair_xg2_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg2_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [156]:
fair_xg2_roc
Out[156]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.61 0.648649 1.002987 0.001268 -0.088669 0.019512 0.125317 [0.7499999999999997] 0.422874
In [ ]:
 

4. RANDOM FOREST CLASSIFIER MODEL WITH OUT HYPER-PARAMETER TUNING

In [157]:
#Creating the classifier
rf_model2 = RandomForestClassifier(random_state=40)
model_rf2=rf_model2
In [158]:
mdl_rf2 = model_rf2.fit(data_orig_train.features, data_orig_train.labels.ravel())
In [159]:
from sklearn.metrics import confusion_matrix
conf_mat_rf2 = confusion_matrix(data_orig_test.labels.ravel(), model_rf2.predict(data_orig_test.features))
conf_mat_rf2
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), model_rf2.predict(data_orig_test.features)))
Out[159]:
array([[ 27,  36],
       [ 21, 116]], dtype=int64)
0.715
In [160]:
unique, counts = np.unique(data_orig_test.labels.ravel(), return_counts=True)
dict(zip(unique, counts))
Out[160]:
{0.0: 63, 1.0: 137}

4.a. Model Explainability/interpretability

4.a.1 Using SHAP (SHapley Additive exPlanations)

In [161]:
import shap
rf_shap_values_t2 = shap.KernelExplainer(mdl_rf2.predict,data_orig_train.features)
Using 800 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.

Test data interpretation

In [162]:
rf_explainer2 = shap.KernelExplainer(mdl_rf2.predict, data_orig_test.features)
rf_shap_values2 = rf_explainer2.shap_values(data_orig_test.features,nsamples=10)
#https://towardsdatascience.com/explain-any-models-with-the-shap-values-use-the-kernelexplainer-79de9464897a
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [163]:
rf_shap_values2
Out[163]:
array([[ 0.        ,  0.        ,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        ,  0.        , -0.3175    , ..., -0.1725    ,
         0.        ,  0.        ],
       [ 0.        ,  0.19      ,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       ...,
       [ 0.        ,  0.        ,  0.015     , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        ,  0.06666667,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        , -0.31      ,  0.        , ..., -0.255     ,
         0.        ,  0.        ]])
In [164]:
rf_explainer2.expected_value
rf_shap_values2
Out[164]:
0.76
Out[164]:
array([[ 0.        ,  0.        ,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        ,  0.        , -0.3175    , ..., -0.1725    ,
         0.        ,  0.        ],
       [ 0.        ,  0.19      ,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       ...,
       [ 0.        ,  0.        ,  0.015     , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        ,  0.06666667,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        , -0.31      ,  0.        , ..., -0.255     ,
         0.        ,  0.        ]])
In [165]:
shap.initjs()
shap.force_plot(rf_explainer2.expected_value,rf_shap_values2[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit')
#https://github.com/slundberg/shap
#https://github.com/slundberg/shap/issues/279
Out[165]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [166]:
shap.initjs()
shap.force_plot(rf_explainer2.expected_value,rf_shap_values2[1,:], data_orig_test.features[1],data_orig_test.feature_names,link='logit')
Out[166]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [167]:
shap.initjs()
shap.force_plot(rf_explainer2.expected_value,rf_shap_values2[2,:], data_orig_test.features[2],data_orig_test.feature_names,link='logit')
Out[167]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [168]:
data_orig_test.feature_names
Out[168]:
['Gender',
 'Age',
 'Marital_Status',
 'CurrentAcc_None',
 'CurrentAcc_LT200',
 'Savings_LT500',
 'CreditHistory_none/paid',
 'Debtors_co-applicant',
 'Job_unskilled-resident',
 'NumMonths',
 'Telephone',
 'Purpose_education',
 'Purpose_furniture/equip',
 'CreditAmount',
 'Foreignworker',
 'Debtors_guarantor']
In [169]:
shap.force_plot(rf_explainer2.expected_value,
                rf_shap_values2, data_orig_test.features[:,:],feature_names = data_orig_test.feature_names)
Out[169]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [170]:
p = shap.summary_plot(rf_shap_values2, data_orig_test.features, feature_names=data_orig_test.feature_names,plot_type="bar") 
display(p)
None

Variables with higher impact are displayed at the top.

In [171]:
shap.plots._waterfall.waterfall_legacy(rf_explainer2.expected_value, rf_shap_values2[0,:],feature_names=data_orig_test.feature_names)

Interpretation of graph: https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html

f(x)- model output impacted by features; E(f(x))- expected output.

One the fundemental properties of Shapley values is that they always sum up to the difference between the game outcome when all players are present and the game outcome when no players are present. For machine learning models this means that SHAP values of all the input features will always sum up to the difference between baseline (expected) model output and the current model output for the prediction being explained.

In [172]:
shap.plots._waterfall.waterfall_legacy(rf_explainer2.expected_value, rf_shap_values2[1],feature_names=data_orig_test.feature_names)

4.a.2 Using ELI5

In [173]:
#!pip install eli5
import eli5
from eli5.sklearn import PermutationImportance
In [174]:
perm_rf2 = PermutationImportance(mdl_rf2).fit(data_orig_test.features, data_orig_test.labels.ravel())
In [175]:
data_orig_test.labels[:10,:].ravel()
Out[175]:
array([1., 0., 1., 0., 1., 0., 1., 1., 1., 1.])

Feature Importance

In [176]:
perm_imp_11=eli5.show_weights(perm_rf2,feature_names = data_orig_test.feature_names)
perm_imp_11
plt.show()
Out[176]:
Weight Feature
0.0420 ± 0.0185 CreditHistory_none/paid
0.0370 ± 0.0233 CreditAmount
0.0200 ± 0.0200 NumMonths
0.0190 ± 0.0279 Savings_LT500
0.0170 ± 0.0136 Purpose_furniture/equip
0.0140 ± 0.0319 CurrentAcc_LT200
0.0110 ± 0.0075 Debtors_guarantor
0.0090 ± 0.0075 Purpose_education
0.0070 ± 0.0185 CurrentAcc_None
0.0050 ± 0.0063 Foreignworker
0.0040 ± 0.0040 Debtors_co-applicant
0.0010 ± 0.0133 Telephone
-0.0060 ± 0.0117 Marital_Status
-0.0060 ± 0.0117 Age
-0.0120 ± 0.0102 Job_unskilled-resident
-0.0190 ± 0.0133 Gender

Explaining individual predictions

In [177]:
show_prediction(mdl_rf2, data_orig_test.features[0], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[177]:

y=1.0 (probability 0.660) top features

Contribution? Feature Value
+0.707 <BIAS> 1.000
+0.161 CurrentAcc_LT200 0.000
+0.022 CreditAmount 0.308
+0.013 Gender 1.000
+0.007 Debtors_co-applicant 0.000
+0.003 Purpose_furniture/equip 0.000
+0.003 Purpose_education 0.000
+0.002 Marital_Status 1.000
+0.002 Job_unskilled-resident 0.000
-0.000 Telephone 0.000
-0.001 Debtors_guarantor 0.000
-0.002 Foreignworker 1.000
-0.012 CreditHistory_none/paid 1.000
-0.027 Age 0.000
-0.031 Savings_LT500 1.000
-0.080 CurrentAcc_None 0.000
-0.107 NumMonths 36.000
In [178]:
from eli5 import show_prediction
show_prediction(mdl_rf2, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[178]:

y=0.0 (probability 0.580) top features

Contribution? Feature Value
+0.293 <BIAS> 1.000
+0.085 CurrentAcc_LT200 1.000
+0.074 NumMonths 36.000
+0.058 CurrentAcc_None 0.000
+0.051 Savings_LT500 1.000
+0.032 CreditHistory_none/paid 1.000
+0.026 Telephone 0.000
+0.024 Debtors_guarantor 0.000
+0.017 Marital_Status 0.000
+0.009 Purpose_education 0.000
+0.007 Job_unskilled-resident 0.000
+0.006 Foreignworker 1.000
+0.000 Debtors_co-applicant 0.000
-0.016 Gender 1.000
-0.020 Age 1.000
-0.030 Purpose_furniture/equip 0.000
-0.037 CreditAmount 0.113

4.b. Measuring fairness

Of Baseline model

In [179]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(mdl_rf2, X_test, y_test)
In [180]:
fair = get_fair_metrics_and_plot(filename, data_orig_test, mdl_rf2)
fair
Computing fairness of the model.
Out[180]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.0000 0.000000 0.000000 0.000000 0.00000 1.000 0.000000
Gender 0.715 0.802768 0.8088 -0.153222 -0.198933 -0.168697 0.06621 0.771 0.159803
In [181]:
type(data_orig_train)
Out[181]:
aif360.datasets.binary_label_dataset.BinaryLabelDataset

PRE PROCESSING

In [182]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_rf2 = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_rf2_rw = RW_rf2.fit_transform(data_orig_train)

#train and save model
rf2_transf_rw = model_rf2.fit(data_transf_train_rf2_rw.features,
                     data_transf_train_rf2_rw.labels.ravel())

data_transf_test_rf2_rw = RW_rf2.transform(data_orig_test)
fair_rf2_rw = get_fair_metrics_and_plot(filename, data_transf_test_rf2_rw, rf2_transf_rw, plot=False)
Computing fairness of the model.
In [183]:
fair_rf_rw
Out[183]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.0000 1.000000 0.000000 0.000000 0.000000 0.000000 1.00 0.000000
Gender 0.700204 0.8138 0.969109 -0.029266 -0.038364 -0.074567 -0.165529 0.94 0.071111
In [184]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR_rf2 = DisparateImpactRemover()
data_transf_train_rf2_dir = DIR_rf2.fit_transform(data_orig_train)

# Train and save the model
rf2_transf_dir = model_rf2.fit(data_transf_train_rf2_dir.features,data_transf_train_rf2_dir.labels.ravel())
In [185]:
fair_dir_rf2_dir = get_fair_metrics_and_plot(filename,data_orig_test, rf2_transf_dir, plot=False)
fair_dir_rf2_dir
Computing fairness of the model.
Out[185]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.00000 0.00000 0.000000 0.00000 1.000 0.000000
Gender 0.715 0.804124 0.795207 -0.16692 -0.20935 -0.183906 0.06621 0.774 0.154793
In [186]:
conf_mat_rf2_dir = confusion_matrix(data_orig_test.labels.ravel(), rf2_transf_dir.predict(data_orig_test.features))
conf_mat_rf2_dir
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), rf2_transf_dir.predict(data_orig_test.features)))
Out[186]:
array([[ 26,  37],
       [ 20, 117]], dtype=int64)
0.715

INPROCESSING

In [187]:
#!pip install --user --upgrade tensorflow==1.15.0
#2.2.0
#!pip uninstall tensorflow
In [188]:
#!pip install "tensorflow==1.15"
#!pip install --upgrade tensorflow-hub
In [189]:
#%tensorflow_version 1.15
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [190]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [191]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [192]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope1',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_rf2_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
#train and save the model
        debiased_model_rf2_ad.fit(data_orig_train)
        fair_rf2_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_rf2_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.954649; batch adversarial loss: 0.638520
epoch 1; iter: 0; batch classifier loss: 0.785396; batch adversarial loss: 0.622496
epoch 2; iter: 0; batch classifier loss: 0.860919; batch adversarial loss: 0.744553
epoch 3; iter: 0; batch classifier loss: 0.685736; batch adversarial loss: 0.572103
epoch 4; iter: 0; batch classifier loss: 0.648918; batch adversarial loss: 0.603511
epoch 5; iter: 0; batch classifier loss: 0.728703; batch adversarial loss: 0.656826
epoch 6; iter: 0; batch classifier loss: 0.710048; batch adversarial loss: 0.627021
epoch 7; iter: 0; batch classifier loss: 0.628043; batch adversarial loss: 0.625900
epoch 8; iter: 0; batch classifier loss: 0.821251; batch adversarial loss: 0.598613
epoch 9; iter: 0; batch classifier loss: 0.638662; batch adversarial loss: 0.619882
Out[192]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x1595ba6e988>
Computing fairness of the model.
In [193]:
fair_rf2_ad
Out[193]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.00000
Gender 0.685 0.809668 0.938786 -0.060375 -0.038364 -0.086105 -0.101725 [0.965] 0.07289
In [194]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_pr_rf2 = PrejudiceRemover()

# Train and save the model
debiased_model_pr_rf2.fit(data_orig_train)

fair_rf2_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_pr_rf2, plot=False, model_aif=True)
fair_rf2_pr
Out[194]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x1595bdc9ac8>
Computing fairness of the model.
Out[194]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.715 0.813115 0.817399 -0.161339 -0.143039 -0.22075 -0.035261 [0.8730000000000001] 0.119956
#
In [195]:
y_pred = debiased_model_pr_rf2.predict(data_orig_test)


data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [196]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = mdl_rf2.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = mdl_rf2.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [197]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_rf2 = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_rf2 = EOPP_rf2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf2_eopp = EOPP_rf2.predict(data_orig_test_pred)
fair_rf2_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf2_eopp, pred_is_dataset=True)
Computing fairness of the model.
In [198]:
fair_rf2_eo
Out[198]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.00000 1 0.000000
Gender 0.64 0.729323 1.006698 0.004313 -0.001016 -0.029739 -0.03653 [0.6789999999999998] 0.271871
In [199]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_rf2 = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=42)

CPP_rf2 = CPP_rf2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf2_cpp = CPP_rf2.predict(data_orig_test_pred)
fair_rf2_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf2_cpp, pred_is_dataset=True)
Computing fairness of the model.
In [200]:
fair_rf2_ceo
Out[200]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.665 0.789969 0.666667 -0.333333 -0.268293 -0.403377 -0.027651 [0.919] 0.113926
In [201]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_rf2 = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_rf2 = ROC_rf2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf2_roc = ROC_rf2.predict(data_orig_test_pred)
fair_rf2_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf2_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [202]:
fair_rf2_roc
Out[202]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.745 0.823529 1.066599 0.049721 -0.021341 0.047022 0.005835 [0.7569999999999999] 0.140459

5. KNN

In [203]:
from sklearn import neighbors
n_neighbors = 15
knn = neighbors.KNeighborsClassifier(n_neighbors, weights='distance')
In [204]:
knn.fit(data_orig_train.features, data_orig_train.labels.ravel())
Out[204]:
KNeighborsClassifier(n_neighbors=15, weights='distance')
In [205]:
conf_mat_knn = confusion_matrix(data_orig_test.labels.ravel(), knn.predict(data_orig_test.features))
conf_mat_knn
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), knn.predict(data_orig_test.features)))
Out[205]:
array([[ 25,  38],
       [ 22, 115]], dtype=int64)
0.7

5.a. Model Explainability/interpretability

5.a.1 Using SHAP (SHapley Additive exPlanations)

In [206]:
knn_explainer = shap.KernelExplainer(knn.predict, data_orig_test.features)
knn_shap_values = knn_explainer.shap_values(data_orig_test.features,nsamples=10)
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [207]:
#shap.dependence_plot(0, knn_shap_values, data_orig_test.features)
In [208]:
# plot the SHAP values for the 0th observation 
shap.force_plot(knn_explainer.expected_value,knn_shap_values[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit') 
Out[208]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [209]:
# plot the SHAP values for the 1st observation 
shap.force_plot(knn_explainer.expected_value,knn_shap_values[1,:],  data_orig_test.features[1],data_orig_test.feature_names,link='logit') 
Out[209]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [210]:
shap.force_plot(knn_explainer.expected_value, knn_shap_values,  data_orig_test.feature_names,link='logit')
Out[210]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [211]:
shap.summary_plot(knn_shap_values, data_orig_test.features,feature_names=data_orig_test.feature_names, plot_type="violin")

Feature Importance

perm_imp_11=eli5.show_weights(knn,feature_names = data_orig_test.feature_names) perm_imp_11 plt.show()

Explaining individual predictions

In [212]:
from eli5 import show_prediction
show_prediction(knn, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[212]:
Error: estimator KNeighborsClassifier(n_neighbors=15, weights='distance') is not supported

5.b. Measuring fairness

Of Baseline model

In [213]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(knn, X_test, y_test)
In [214]:
fair = get_fair_metrics_and_plot(filename, data_orig_test, knn)
fair
Computing fairness of the model.
Out[214]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.0 1.000000 1.000000 0.000000 0.000000 0.000000 0.00000 1.000 0.000000
Gender 0.7 0.793103 0.957063 -0.033232 -0.118902 -0.003297 0.07103 0.786 0.166924

PRE PROCESSING

In [215]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_knn = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_knn = RW_knn.fit_transform(data_orig_train)

# Train and save the model
knn_transf_rw = knn.fit(data_transf_train_knn.features,
                     data_transf_train_knn.labels.ravel())

data_transf_test_knn_rw = RW_knn.transform(data_orig_test)
fair_knn_rw = get_fair_metrics_and_plot(filename, data_transf_test_knn_rw, knn_transf_rw, plot=False)
Computing fairness of the model.
In [216]:
fair_knn_rw
Out[216]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.692766 0.784773 0.977849 -0.016865 -0.118902 -0.003297 0.025218 0.786 0.166924
In [217]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR = DisparateImpactRemover()
data_transf_train_knn_dir = DIR.fit_transform(data_orig_train)
# Train and save the model
knn_transf_dir = knn.fit(data_transf_train_knn_dir.features,
                     data_transf_train_knn_dir.labels.ravel())
In [218]:
fair_knn_dir = get_fair_metrics_and_plot(filename, data_orig_test, knn_transf_dir, plot=False)
fair_knn_dir
Computing fairness of the model.
Out[218]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.68 0.785235 0.864299 -0.113394 -0.174543 -0.091117 0.069001 0.819 0.158979

INPROCESSING

In [219]:
#!pip install tensorflow
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [220]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [221]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [222]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope4',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_knn_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
        debiased_model_knn_ad.fit(data_orig_train)
        fair_knn_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_knn_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 1.591558; batch adversarial loss: 0.701906
epoch 1; iter: 0; batch classifier loss: 1.313166; batch adversarial loss: 0.706221
epoch 2; iter: 0; batch classifier loss: 0.788168; batch adversarial loss: 0.698220
epoch 3; iter: 0; batch classifier loss: 0.666918; batch adversarial loss: 0.692705
epoch 4; iter: 0; batch classifier loss: 0.775418; batch adversarial loss: 0.685819
epoch 5; iter: 0; batch classifier loss: 0.734718; batch adversarial loss: 0.693009
epoch 6; iter: 0; batch classifier loss: 0.681075; batch adversarial loss: 0.689342
epoch 7; iter: 0; batch classifier loss: 0.715226; batch adversarial loss: 0.693229
epoch 8; iter: 0; batch classifier loss: 0.654493; batch adversarial loss: 0.683225
epoch 9; iter: 0; batch classifier loss: 0.687141; batch adversarial loss: 0.682973
Out[222]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x1595df46888>
Computing fairness of the model.
In [223]:
fair_knn_ad
Out[223]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.68 0.806061 0.895019 -0.104262 -0.097561 -0.115704 -0.057839 [0.959] 0.078053
In [224]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_knn_pr = PrejudiceRemover()

# Train and save the model
debiased_model_knn_pr.fit(data_orig_train)

fair_knn_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_knn_pr, plot=False, model_aif=True)
fair_knn_pr
Out[224]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x1595e1e83c8>
Computing fairness of the model.
Out[224]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.715 0.813115 0.817399 -0.161339 -0.143039 -0.22075 -0.035261 [0.8730000000000001] 0.119956
#
In [225]:
y_pred = debiased_model_knn_pr.predict(data_orig_test)

data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [226]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = knn.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = knn.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [227]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_knn = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_knn = EOPP_knn.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_knn_eop = EOPP_knn.predict(data_orig_test_pred)
fair_knn_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_knn_eop, pred_is_dataset=True)
Computing fairness of the model.
In [228]:
fair_knn_eo
Out[228]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.575 0.697509 1.004233 0.003044 0.023374 -0.012159 -0.074835 [0.6889999999999997] 0.273664
In [229]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_knn = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=40)

CPP_knn = CPP_knn.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_knn_cp = CPP_knn.predict(data_orig_test_pred)
fair_knn_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_knn_cp, pred_is_dataset=True)
Computing fairness of the model.
In [230]:
fair_knn_ceo
Out[230]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.65 0.782609 0.722222 -0.277778 -0.268293 -0.287992 0.027905 [0.924] 0.114693
In [231]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_knn = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_knn = ROC_knn.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_knn_roc = ROC_knn.predict(data_orig_test_pred) 
fair_knn_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_knn_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [232]:
fair_knn_roc
Out[232]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.00000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.675 0.749035 1.044613 0.02689 -0.105437 0.084974 0.138255 [0.8390000000000002] 0.265299

6. Logistic Regression

In [233]:
from sklearn.linear_model import LogisticRegression

lr = LogisticRegression()
In [234]:
lr.fit(data_orig_train.features, data_orig_train.labels.ravel())
Out[234]:
LogisticRegression()
In [235]:
conf_mat_lr = confusion_matrix(data_orig_test.labels.ravel(), lr.predict(data_orig_test.features))
conf_mat_lr
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), lr.predict(data_orig_test.features)))
Out[235]:
array([[ 21,  42],
       [ 11, 126]], dtype=int64)
0.735

6.a. Model Explainability/interpretability

6.a.1 Using SHAP (SHapley Additive exPlanations)

In [236]:
lr_explainer = shap.KernelExplainer(lr.predict, data_orig_test.features)
lr_shap_values = lr_explainer.shap_values(data_orig_test.features,nsamples=10)
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [237]:
# plot the SHAP values for the 0th observation 
shap.force_plot(lr_explainer.expected_value,lr_shap_values[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit') 
Out[237]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [238]:
# plot the SHAP values for the 1st observation 
shap.force_plot(lr_explainer.expected_value,lr_shap_values[1,:],  data_orig_test.features[1],data_orig_test.feature_names,link='logit') 
Out[238]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [239]:
shap.force_plot(lr_explainer.expected_value, lr_shap_values,  data_orig_test.feature_names,link='logit')
Out[239]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [240]:
shap.summary_plot(lr_shap_values, data_orig_test.features,feature_names=data_orig_test.feature_names, plot_type="violin")

Feature Importance

perm_imp_11=eli5.show_weights(knn,feature_names = data_orig_test.feature_names) perm_imp_11 plt.show()

Explaining individual predictions

In [241]:
from eli5 import show_prediction
show_prediction(lr, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[241]:

y=0.0 (probability 0.606, score -0.432) top features

Contribution? Feature Value
+1.278 NumMonths 36.000
+0.821 Savings_LT500 1.000
+0.741 CurrentAcc_LT200 1.000
+0.681 Foreignworker 1.000
+0.572 CreditHistory_none/paid 1.000
+0.066 CreditAmount 0.113
-0.397 Gender 1.000
-0.444 Age 1.000
-2.886 <BIAS> 1.000

6.b. Measuring fairness

Of Baseline model

In [242]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(lr, X_test, y_test)
In [243]:
fair_lr = get_fair_metrics_and_plot(filename, data_orig_test, lr)
fair_lr
Computing fairness of the model.
Out[243]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.00000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.735 0.82623 0.817399 -0.161339 -0.163872 -0.211167 -0.007864 0.873 0.107953

PRE PROCESSING

In [244]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_lr = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_lr = RW_lr.fit_transform(data_orig_train)

# Train and save the model
lr_transf_rw = lr.fit(data_transf_train_knn.features,
                     data_transf_train_knn.labels.ravel())

data_transf_test_lr_rw = RW_lr.transform(data_orig_test)
fair_lr_rw = get_fair_metrics_and_plot(filename, data_transf_test_lr_rw, lr_transf_rw, plot=False)
Computing fairness of the model.
In [245]:
fair_lr_rw
Out[245]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.000000 0.000000 0.00000 1.000 0.000000
Gender 0.718017 0.812975 0.850475 -0.130523 -0.163872 -0.211167 -0.05298 0.873 0.107953
In [246]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR = DisparateImpactRemover()
data_transf_train_lr_dir = DIR.fit_transform(data_orig_train)
# Train and save the model
lr_transf_dir = lr.fit(data_transf_train_lr_dir.features,
                     data_transf_train_lr_dir.labels.ravel())
In [247]:
fair_lr_dir = get_fair_metrics_and_plot(filename, data_orig_test, lr_transf_dir, plot=False)
fair_lr_dir
Computing fairness of the model.
Out[247]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.745 0.831683 0.830271 -0.147641 -0.163872 -0.191167 0.005835 0.868 0.106743

INPROCESSING

In [248]:
#!pip install tensorflow
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [249]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [250]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [251]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope5',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_lr_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
        debiased_model_lr_ad.fit(data_orig_train)
        fair_lr_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_lr_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.784875; batch adversarial loss: 0.731664
epoch 1; iter: 0; batch classifier loss: 0.835361; batch adversarial loss: 0.717697
epoch 2; iter: 0; batch classifier loss: 0.636545; batch adversarial loss: 0.734092
epoch 3; iter: 0; batch classifier loss: 0.642702; batch adversarial loss: 0.758198
epoch 4; iter: 0; batch classifier loss: 0.756227; batch adversarial loss: 0.741437
epoch 5; iter: 0; batch classifier loss: 0.659754; batch adversarial loss: 0.755334
epoch 6; iter: 0; batch classifier loss: 0.693031; batch adversarial loss: 0.728637
epoch 7; iter: 0; batch classifier loss: 0.670030; batch adversarial loss: 0.738602
epoch 8; iter: 0; batch classifier loss: 0.516830; batch adversarial loss: 0.737500
epoch 9; iter: 0; batch classifier loss: 0.580112; batch adversarial loss: 0.733697
Out[251]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x159624367c8>
Computing fairness of the model.
In [252]:
fair_lr_ad
Out[252]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.00000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.685 0.797428 0.78107 -0.202435 -0.143039 -0.28075 -0.076357 [0.91] 0.122724
In [253]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_lr_pr = PrejudiceRemover()

# Train and save the model
debiased_model_lr_pr.fit(data_orig_train)

fair_lr_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_lr_pr, plot=False, model_aif=True)
fair_lr_pr
Out[253]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x15962269708>
Computing fairness of the model.
Out[253]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.715 0.813115 0.817399 -0.161339 -0.143039 -0.22075 -0.035261 [0.8730000000000001] 0.119956
#
In [254]:
y_pred = debiased_model_lr_pr.predict(data_orig_test)

data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [255]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = lr.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = lr.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [256]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_lr = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_lr = EOPP_lr.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_lr_eop = EOPP_lr.predict(data_orig_test_pred)
fair_lr_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_lr_eop, pred_is_dataset=True)
Computing fairness of the model.
In [257]:
fair_lr_eo
Out[257]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.73 0.815068 1.004916 0.003805 -0.021341 -0.03144 -0.040081 [0.802] 0.142752
In [258]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_lr = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=40)

CPP_lr = CPP_lr.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_lr_cp = CPP_lr.predict(data_orig_test_pred)
fair_lr_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_lr_cp, pred_is_dataset=True)
Computing fairness of the model.
In [259]:
fair_lr_ceo
Out[259]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.00000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.71 0.815287 0.76409 -0.222983 -0.184705 -0.291583 -0.042111 [0.888] 0.100714
In [260]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_lr = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_lr = ROC_lr.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_lr_roc = ROC_lr.predict(data_orig_test_pred) 
fair_lr_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_lr_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [261]:
fair_lr_roc
Out[261]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.00000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.00000
Gender 0.765 0.83274 1.039886 0.028412 -0.035315 0.003112 0.007864 [0.814] 0.14642
In [ ]:
 

7. SVM

In [262]:
from sklearn.svm import SVC
#gs = grid_search_cv.best_estimator_
svm = SVC(C=0.85, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
    decision_function_shape='ovr', degree=3, gamma='scale', kernel='linear',
    max_iter=-1, random_state=42, shrinking=True, tol=0.001, probability=True,
    verbose=False)
svm.fit(data_orig_train.features, data_orig_train.labels.ravel())
Out[262]:
SVC(C=0.85, kernel='linear', probability=True, random_state=42)
In [263]:
conf_mat_svm = confusion_matrix(data_orig_test.labels.ravel(), svm.predict(data_orig_test.features))
conf_mat_svm
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), svm.predict(data_orig_test.features)))
Out[263]:
array([[ 20,  43],
       [  9, 128]], dtype=int64)
0.74

7.a. Model Explainability/interpretability

7.a.1 Using SHAP (SHapley Additive exPlanations)

In [264]:
svm_explainer = shap.KernelExplainer(svm.predict, data_orig_test.features)
svm_shap_values = svm_explainer.shap_values(data_orig_test.features,nsamples=10)
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [265]:
# plot the SHAP values for the 0th observation 
shap.force_plot(svm_explainer.expected_value,svm_shap_values[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit') 
Out[265]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [266]:
# plot the SHAP values for the 1st observation 
shap.force_plot(svm_explainer.expected_value,svm_shap_values[1,:],  data_orig_test.features[1],data_orig_test.feature_names,link='logit') 
Out[266]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [267]:
shap.force_plot(svm_explainer.expected_value, svm_shap_values,  data_orig_test.feature_names,link='logit')
Out[267]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [268]:
shap.summary_plot(svm_shap_values, data_orig_test.features,feature_names=data_orig_test.feature_names, plot_type="violin")

Feature Importance

perm_imp_11=eli5.show_weights(knn,feature_names = data_orig_test.feature_names) perm_imp_11 plt.show()

Explaining individual predictions

In [269]:
from eli5 import show_prediction
show_prediction(svm, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[269]:

y=0.0 (probability 0.580, score -0.426) top features

Contribution? Feature Value
+1.198 NumMonths 36.000
+0.919 CurrentAcc_LT200 1.000
+0.814 Savings_LT500 1.000
+0.477 CreditHistory_none/paid 1.000
+0.441 Foreignworker 1.000
+0.029 CreditAmount 0.113
-0.282 Age 1.000
-0.390 Gender 1.000
-2.782 <BIAS> 1.000

7.b. Measuring fairness

Of Baseline model

In [270]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(svm, X_test, y_test)
In [271]:
fair_svm = get_fair_metrics_and_plot(filename, data_orig_test, svm)
fair_svm
Computing fairness of the model.
Out[271]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.00000 0.000000 0.00000 0.000000 1.000 0.000000
Gender 0.74 0.831169 0.825558 -0.15652 -0.149898 -0.21418 -0.026383 0.885 0.097743

PRE PROCESSING

In [272]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_svm = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_svm = RW_svm.fit_transform(data_orig_train)

# Train and save the model
svm_transf_rw = svm.fit(data_transf_train_knn.features,
                     data_transf_train_knn.labels.ravel())

data_transf_test_svm_rw = RW_svm.transform(data_orig_test)
fair_svm_rw = get_fair_metrics_and_plot(filename, data_transf_test_svm_rw, svm_transf_rw, plot=False)
Computing fairness of the model.
In [273]:
fair_svm_rw
Out[273]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.00000 0.000000 0.00000 0.000000 1.000 0.000000
Gender 0.722573 0.817895 0.859441 -0.12468 -0.149898 -0.21418 -0.074233 0.885 0.097743
In [274]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR = DisparateImpactRemover()
data_transf_train_svm_dir = DIR.fit_transform(data_orig_train)
# Train and save the model
svm_transf_dir = svm.fit(data_transf_train_svm_dir.features,
                     data_transf_train_svm_dir.labels.ravel())
In [275]:
fair_svm_dir = get_fair_metrics_and_plot(filename, data_orig_test, svm_transf_dir, plot=False)
fair_svm_dir
Computing fairness of the model.
Out[275]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.00000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.735 0.82623 0.817399 -0.161339 -0.163872 -0.211167 -0.007864 0.882 0.107953

INPROCESSING

In [276]:
#!pip install tensorflow
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [277]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [278]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [279]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope6',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_svm_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
        debiased_model_svm_ad.fit(data_orig_train)
        fair_svm_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_svm_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.784875; batch adversarial loss: 0.731664
epoch 1; iter: 0; batch classifier loss: 0.835361; batch adversarial loss: 0.717697
epoch 2; iter: 0; batch classifier loss: 0.636545; batch adversarial loss: 0.734092
epoch 3; iter: 0; batch classifier loss: 0.642702; batch adversarial loss: 0.758198
epoch 4; iter: 0; batch classifier loss: 0.756227; batch adversarial loss: 0.741437
epoch 5; iter: 0; batch classifier loss: 0.659754; batch adversarial loss: 0.755334
epoch 6; iter: 0; batch classifier loss: 0.693031; batch adversarial loss: 0.728637
epoch 7; iter: 0; batch classifier loss: 0.670030; batch adversarial loss: 0.738602
epoch 8; iter: 0; batch classifier loss: 0.516830; batch adversarial loss: 0.737500
epoch 9; iter: 0; batch classifier loss: 0.580112; batch adversarial loss: 0.733697
Out[279]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x159655fd908>
Computing fairness of the model.
In [280]:
fair_svm_ad
Out[280]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.00000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.685 0.797428 0.78107 -0.202435 -0.143039 -0.28075 -0.076357 [0.91] 0.122724
In [281]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_svm_pr = PrejudiceRemover()

# Train and save the model
debiased_model_svm_pr.fit(data_orig_train)

fair_svm_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_svm_pr, plot=False, model_aif=True)
fair_svm_pr
Out[281]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x1596574f188>
Computing fairness of the model.
Out[281]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.715 0.813115 0.817399 -0.161339 -0.143039 -0.22075 -0.035261 [0.8730000000000001] 0.119956
#
In [282]:
y_pred = debiased_model_svm_pr.predict(data_orig_test)

data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [283]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = svm.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = svm.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [284]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_svm = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_svm = EOPP_svm.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_svm_eop = EOPP_svm.predict(data_orig_test_pred)
fair_svm_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_svm_eop, pred_is_dataset=True)
Computing fairness of the model.
In [285]:
fair_svm_eo
Out[285]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.0 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.7 0.798658 1.016777 0.013445 0.048272 -0.056633 -0.131913 [0.8210000000000001] 0.146602
In [286]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_svm = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=40)

CPP_svm = CPP_svm.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_svm_cp = CPP_svm.predict(data_orig_test_pred)
fair_svm_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_svm_cp, pred_is_dataset=True)
Computing fairness of the model.
In [287]:
fair_svm_ceo
Out[287]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.715 0.820189 0.797495 -0.192796 -0.135925 -0.277193 -0.085997 [0.892] 0.090502
In [288]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_svm = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_svm = ROC_svm.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_svm_roc = ROC_svm.predict(data_orig_test_pred) 
fair_svm_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_svm_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [289]:
fair_svm_roc
Out[289]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.00000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.76 0.827338 1.070774 0.04896 -0.014482 0.023528 0.001015 [0.8049999999999999] 0.156882